PyTorch学习笔记之n-gram模型实现

 1 import torch
 2 import torch.nn as nn
 3 from torch.autograd import Variable
 4 import torch.nn.functional as F
 5 import torch.optim as optim
 6 
 7 CONTEXT_SIZE = 2            # the same as window_size
 8 EMBEDDING_DIM = 10
 9 test_sentence = "When forty winters shall besiege thy brow,And dig deep trenches in thy beauty's field,Thy youth's proud livery so gazed on now,Will be a totter'd weed of small worth held:Then being asked, where all thy beauty lies,Where all the treasure of thy lusty days;To say, within thine own deep sunken eyes,Were an all-eating shame, and thriftless praise.How much more praise deserv'd thy beauty's use,If thou couldst answer 'This fair child of mineShall sum my count, and make my old excuse,'Proving his beauty by succession thine!This were to be new made when thou art old,And see thy blood warm when thou feel'st it cold.".split()
10 
11 
12 vocb = set(test_sentence)       # remove repeated words
13 word2id = {word: i for i, word in enumerate(vocb)}
14 id2word = {word2id[word]: word for word in word2id}
15 
16 # define model
17 class NgramModel(nn.Module):
18     def __init__(self, vocb_size, context_size, n_dim):
19         # super(NgramModel, self)._init_()
20         super().__init__()
21         self.n_word = vocb_size
22         self.embedding = nn.Embedding(self.n_word, n_dim)
23         self.linear1 = nn.Linear(context_size*n_dim, 128)
24         self.linear2 = nn.Linear(128, self.n_word)
25 
26     def forward(self, x):
27         # the first step: transmit words and achieve word embedding. eg. transmit two words, and then achieve (2, 100)
28         emb = self.embedding(x)
29         # the second step: word wmbedding unfold to (1,200)
30         emb = emb.view(1, -1)
31         # the third step: transmit to linear model, and then use relu, at last, transmit to linear model again
32         out = self.linear1(emb)
33         out = F.relu(out)
34         out = self.linear2(out)
35         # the output dim of last step is the number of words, wo can view as a classification problem
36         # if we want to predict the max probability of the words, finally we need use log softmax
37         log_prob = F.log_softmax(out)
38         return log_prob
39 
40 ngrammodel = NgramModel(len(word2id), CONTEXT_SIZE, 100)
41 criterion = nn.NLLLoss()
42 optimizer = optim.SGD(ngrammodel.parameters(), lr=1e-3)
43 
44 trigram = [((test_sentence[i], test_sentence[i+1]), test_sentence[i+2])
45             for i in range(len(test_sentence)-2)]
46 
47 for epoch in range(100):
48     print('epoch: {}'.format(epoch+1))
49     print('*'*10)
50     running_loss = 0
51     for data in trigram:
52         # we use 'word' to represent the two words forward the predict word, we use 'label' to represent the predict word
53         word, label = data                          # attention
54         word = Variable(torch.LongTensor([word2id[e] for e in word]))
55         label = Variable(torch.LongTensor([word2id[label]]))
56         # forward
57         out = ngrammodel(word)
58         loss = criterion(out, label)
59         running_loss += loss.data[0]
60         # backward
61         optimizer.zero_grad()
62         loss.backward()
63         optimizer.step()
64     print('loss: {:.6f}'.format(running_loss/len(word2id)))
65 
66 # predict
67 word, label = trigram[3]
68 word = Variable(torch.LongTensor([word2id[i] for i in word]))
69 out = ngrammodel(word)
70 _, predict_label = torch.max(out, 1)
71 predict_word = id2word[predict_label.data[0][0]]
72 print('real word is {}, predict word is {}'.format(label, predict_word))

 

posted @ 2017-07-13 09:53  Joyce_song94  阅读(1961)  评论(0编辑  收藏  举报